# Required Packages
import pandas as pd
import numpy as np
import pickle
# Modeling
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, cross_val_score, KFold, StratifiedShuffleSplit, ShuffleSplit
from sklearn import preprocessing
# Visualisation libraries
## Progress Bar
import progressbar
## Text
from colorama import Fore, Back, Style
from IPython.display import Image, display, Markdown, Latex, clear_output
## plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
from plotly.subplots import make_subplots
import plotly.express as px
## seaborn
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size":12,"axes.titlesize":14,"axes.labelsize":12})
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Ellipse, Polygon
import matplotlib.gridspec as gridspec
import matplotlib.colors
from pylab import rcParams
plt.style.use('seaborn-whitegrid')
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (17, 6)
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['text.color'] = 'k'
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
![]()
In this article, we investigate a set simulated dataset that mimics customer behavior on the Starbucks rewards mobile app. Starbucks tends to send out offers to users of the mobile app once every few days. These offers are exclusive, that is not all users receive the same offer. An offer can contain a discount for their products or sometimes BOGO (buy one get one free). These offers have a validity period before the offer expires. The article here is inspired by a towardsdatascience.com article.
Target = ['BOGO offer', 'Disc offer','Info offer']
Labels = ['BOGO Offers', 'Discount Offers','Informational Offers']
def Header(Text, L = 100, C = 'Blue', T = 'White'):
BACK = {'Black': Back.BLACK, 'Red':Back.RED, 'Green':Back.GREEN, 'Yellow': Back.YELLOW, 'Blue': Back.BLUE,
'Magenta':Back.MAGENTA, 'Cyan': Back.CYAN}
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(BACK[C] + FORE[T] + Style.NORMAL + Text + Style.RESET_ALL + ' ' + FORE[C] +
Style.NORMAL + (L- len(Text) - 1)*'=' + Style.RESET_ALL)
def Line(L=100, C = 'Blue'):
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(FORE[C] + Style.NORMAL + L*'=' + Style.RESET_ALL)
# User_Data Dataset
Header('Standardized User Dataset:')
User_Data = pd.read_csv('StarBucks/User_Data_STD.csv')
User_Data = User_Data.drop(['No Offer','BOGO comp','Info comp','Disc comp', 'Tot Rewards Rec','Offer Difficulty'], axis=1)
display(User_Data.head(8).style.hide_index())
# Dictionaries
with open('StarBucks/Feat_Dict.pkl', 'rb') as fp:
Feat_Dict = pickle.load(fp)
Standardized User Dataset: =========================================================================
| ID | BOGO Offer Rec | Disc Offer Rec | Info Offer Rec | Tot Tran Cnt | Tot Tran Amnt | Ave Tran Amnt | Offer Viewed | Offer Completed | Offer Trans Amnt | BOGO offer | Disc offer | Info offer | Offer Tran Cnt Ratio | Offer Trans Amnt Ratio | Offer Comp View Ratio | Offer Comp Rec Ratio | Tran Amnt per Offer | Reward per Offer | Difficulty per Offer | Age | Income | Member Tenure | Gender Female | Gender Male | Gender Other |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 68be06ca386d4c31939f3a4f0e3dd783 | -1.601193 | 2.831781 | -1.024275 | 0.161509 | -0.667401 | -0.692193 | 0.617188 | 0.180208 | -0.322012 | 0.000000 | 1.000000 | 0.000000 | -0.070352 | 1.913865 | -0.126536 | 0.033686 | -0.259408 | -0.398152 | 0.777036 | 0.035510 | -0.060733 | 0.059884 | -0.750862 | -0.998119 | 2.474249 |
| 0610b486422d4921ae7d2bf64640c50b | -0.708695 | -1.588245 | 0.118664 | -1.011260 | -0.217826 | 0.770681 | -2.274659 | -1.223472 | -0.610991 | 0.000000 | 0.000000 | 0.000000 | -0.998783 | -1.175801 | -1.333457 | -1.272517 | -0.607588 | -0.947462 | -1.223589 | 0.035510 | 2.318549 | -0.309958 | 1.331803 | -0.998119 | -0.404163 |
| 38fe809add3b4fcf9315a9694bb96ff5 | -0.708695 | -1.588245 | 0.118664 | -0.424876 | -0.715844 | -0.684942 | -1.696290 | -1.223472 | -0.610991 | 0.000000 | 0.000000 | 0.000000 | -0.998783 | -1.175801 | -1.333457 | -1.272517 | -0.607588 | -0.947462 | -1.223589 | 0.035510 | -0.060733 | -1.197578 | -0.750862 | -0.998119 | 2.474249 |
| 78afa995795e4d85b5d9ceeca43f5fef | 1.076299 | -1.588245 | 0.118664 | -0.229414 | 0.435452 | 0.588345 | 0.617188 | 0.882048 | 0.873883 | 1.000000 | 0.000000 | 1.000000 | 0.815877 | 0.848463 | 0.476925 | 1.176614 | 0.584321 | 0.518098 | -0.046751 | 1.272409 | 1.723728 | -0.162021 | 1.331803 | -0.998119 | -0.404163 |
| a03223e636434f42ac4c3df47e8bac43 | -1.601193 | 1.063770 | 1.261603 | -1.011260 | -0.792481 | -0.737011 | 0.617188 | -1.223472 | -0.610991 | 0.000000 | 0.000000 | 0.000000 | -0.998783 | -1.175801 | -1.333457 | -1.272517 | -0.607588 | -0.947462 | -1.223589 | 0.035510 | -0.060733 | -0.383926 | -0.750862 | -0.998119 | 2.474249 |
| e2127556f4f64592b11af22de27a7932 | -0.708695 | 0.179765 | 0.118664 | -1.011260 | -0.370940 | 0.368942 | -0.539551 | 0.180208 | -0.009862 | 1.000000 | 1.000000 | 0.000000 | 1.828710 | 1.097057 | 0.678078 | 0.360237 | 0.116282 | -0.178428 | 0.541668 | 0.839494 | 0.236677 | -1.049641 | -0.750862 | 1.001884 | -0.404163 |
| 8ec6ce2a7e7949b1bf142def7d0e0586 | -0.708695 | 1.063770 | 0.118664 | -1.597644 | -0.829410 | -0.833898 | 1.195558 | -1.223472 | -0.610991 | 0.000000 | 0.000000 | 0.000000 | -0.998783 | -1.175801 | -1.333457 | -1.272517 | -0.607588 | -0.947462 | -1.223589 | 0.035510 | -0.060733 | -0.531863 | -0.750862 | -0.998119 | 2.474249 |
| 68617ca6246f4fbc85e91a2a49552598 | 1.076299 | 0.179765 | -1.024275 | -1.206721 | -0.827504 | -0.826397 | 0.038819 | -1.223472 | -0.610991 | 0.000000 | 0.000000 | 0.000000 | -0.998783 | -1.175801 | -1.333457 | -1.272517 | -0.607588 | -0.947462 | -1.223589 | 0.035510 | -0.060733 | -0.531863 | -0.750862 | -0.998119 | 2.474249 |
Table = User_Data[Target].sum(axis = 0).astype(int).to_frame('Count').reset_index(drop = False).\
rename(columns = {'index':'Offers'})
Table['Offers'] = Table['Offers'].replace(Feat_Dict)
Table['Percentage'] = np.round(100*(Table['Count']/Table['Count'].sum()),2)
fig = make_subplots(rows=1, cols=2, horizontal_spacing = 0.02, column_widths=[0.6, 0.4],
specs=[[{"type": "table"},{"type": "pie"}]])
PieColors = ['SeaGreen', 'FireBrick', 'RoyalrBlue']
TableColors = ['DimGray','White']
# Right
fig.add_trace(go.Pie(labels=Table['Offers'].values, values=Table['Count'].values, pull=[.01, .01, .01], textfont=dict(size=16),
marker=dict(colors = PieColors, line=dict(color='black', width=1))), row=1, col=2)
fig.update_traces(hole=.5)
fig.update_layout(height = 400, legend=dict(orientation="v"), legend_title_text= 'Offers',
annotations=[dict(text= '<b>' + 'Offers' + '<b>', x=0.835, y=0.5, font_size=14, showarrow=False)])
# Left
T = Table.copy()
T['Percentage'] = T['Percentage'].map(lambda x: '%.2f' % x)
Temp = []
for i in T.columns:
Temp.append(T.loc[:,i].values)
fig.add_trace(go.Table(header=dict(values = list(Table.columns), line_color='darkslategray',
fill_color= TableColors[0], align=['center','center'],
font=dict(color='white', size=12), height=25), columnwidth = [0.4, 0.2, 0.2],
cells=dict(values=Temp, line_color='darkslategray',
fill=dict(color= [TableColors[1], TableColors[1]]),
align=['center', 'center'], font_size=12, height=20)), 1, 1)
fig.update_layout(title={'text': '<b>' + 'Offers' + 'Distribution' + '<b>', 'x':0.5,
'y':0.90, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
del Table, PieColors, TableColors
First off, let's define $X$ and $y$ sets.
Temp = Target.copy()
Temp.append('ID')
X = User_Data.drop(columns = Temp)
y = User_Data[list(Target)].astype(int)
del Temp
StratifiedKFold is a variation of k-fold which returns stratified folds: each set contains approximately the same percentage of samples of each target class as the complete set.
Test_Size = 0.3
sss = StratifiedShuffleSplit(n_splits=1, test_size=Test_Size, random_state=42)
_ = sss.get_n_splits(X, y)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X.loc[train_index], X.loc[test_index]
y_train, y_test = y.loc[train_index], y.loc[test_index]
del sss
Colors = ['SeaGreen', 'FireBrick', 'RoyalrBlue']
fig = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}]*2])
Table = y_train.sum(axis = 0).astype(int).to_frame('Count').reset_index(drop = False).\
rename(columns = {'index':'Offers'})
Table['Offers'] = Table['Offers'].replace(Feat_Dict)
fig.add_trace(go.Pie(labels=Table['Offers'].values,
values=Table['Count'].values,
pull=[.01, .01, .01],
name= 'Train Set',
textfont=dict(size=16),
marker= dict(colors = Colors, line=dict(color='black', width=1))), 1, 1)
Table = y_test.sum(axis = 0).astype(int).to_frame('Count').reset_index(drop = False).\
rename(columns = {'index':'Offers'})
Table['Offers'] = Table['Offers'].replace(Feat_Dict)
fig.add_trace(go.Pie(labels=Table['Offers'].values,
values=Table['Count'].values,
pull=[.01, .01, .01],
name= 'Test Set',
textfont=dict(size=16),
marker= dict(colors = Colors, line=dict(color='black', width=1))), 1, 2)
fig.update_traces(hole=.4)
fig.update_layout(height = 400, legend=dict(orientation="v"),
legend_title_text= 'Offers',
annotations=[dict(text= '<b>' + 'Train<br>Set' + '<b>', x=0.195, y=0.5, font_size=14, showarrow=False),
dict(text= '<b>' + 'Test<br>Set' + '<b>', x=0.8, y=0.5, font_size=14, showarrow=False)],
title={'text': '<b>' + 'Train and Test Sets' + '<b>', 'x':0.48, 'y': .83,
'xanchor': 'center', 'yanchor': 'top'})
fig.show()
del Table, Colors
Here we use the sklearn MultiOutputClassifier with RandomForestClassifier for our modeling.
def Stratified_CV_Scoring(model, X = X, y = y, n_splits = 10):
sss = StratifiedShuffleSplit(n_splits = n_splits, test_size=Test_Size, random_state=42)
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.DataFrame):
y = y.values
_ = sss.get_n_splits(X, y)
Reports_Train = []
Reports_Test = []
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
_ = model.fit(X_train,y_train)
# Train
y_pred = model.predict(X_train)
R = pd.DataFrame(metrics.classification_report(y_train, y_pred, target_names=Labels, output_dict=True)).T
Reports_Train.append(R.values)
# Test
y_pred = model.predict(X_test)
R = pd.DataFrame(metrics.classification_report(y_test, y_pred, target_names=Labels, output_dict=True)).T
Reports_Test.append(R.values)
# Train
ALL = Reports_Train[0].ravel()
for i in range(1, len(Reports_Train)):
ALL = np.vstack((ALL, Reports_Train[i].ravel()))
Mean = pd.DataFrame(ALL.mean(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
STD = pd.DataFrame(ALL.std(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
Reports_Train = Mean.applymap(lambda x: ('%.4f' % x))+ ' ± ' +STD.applymap(lambda x: ('%.4f' % x))
del ALL, Mean, STD
# Test
ALL = Reports_Test[0].ravel()
for i in range(1, len(Reports_Test)):
ALL = np.vstack((ALL, Reports_Test[i].ravel()))
Mean = pd.DataFrame(ALL.mean(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
STD = pd.DataFrame(ALL.std(axis = 0).reshape(R.shape), index = R.index, columns = R.columns)
Reports_Test = Mean.applymap(lambda x: ('%.4f' % x))+ ' ± ' +STD.applymap(lambda x: ('%.4f' % x))
del ALL, Mean, STD
Reports_Train = Reports_Train.reset_index().rename(columns ={'index': 'Train Set (CV = % i)' % n_splits})
Reports_Test = Reports_Test.reset_index().rename(columns ={'index': 'Test Set (CV = % i)' % n_splits})
return Reports_Train, Reports_Test
# Random Forest Classifier using 100 estimators
RFC = MultiOutputClassifier(RandomForestClassifier(n_estimators=100, oob_score=True))
_ = RFC.fit(X_train, y_train)
n_splits = 20
Reports_Train, Reports_Test = Stratified_CV_Scoring(RFC, X = X, y = y, n_splits = n_splits)
display(Reports_Train.style.hide_index().set_properties(**{'background-color': 'HoneyDew', 'color': 'Black'}).\
set_properties(subset=['Train Set (CV = % i)' % n_splits], **{'background-color': 'SeaGreen', 'color': 'White'}))
display(Reports_Test.style.hide_index().set_properties(**{'background-color': 'Azure', 'color': 'Black'}).\
set_properties(subset=['Test Set (CV = % i)' % n_splits], **{'background-color': 'RoyalBlue', 'color': 'White'}))
| Train Set (CV = 20) | precision | recall | f1-score | support |
|---|---|---|---|---|
| BOGO Offers | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 5254.0000 ± 0.0000 |
| Discount Offers | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 5996.0000 ± 0.0000 |
| Informational Offers | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 3487.0000 ± 0.0000 |
| micro avg | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 14737.0000 ± 0.0000 |
| macro avg | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 14737.0000 ± 0.0000 |
| weighted avg | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 1.0000 ± 0.0000 | 14737.0000 ± 0.0000 |
| samples avg | 0.7629 ± 0.0000 | 0.7629 ± 0.0000 | 0.7629 ± 0.0000 | 14737.0000 ± 0.0000 |
| Test Set (CV = 20) | precision | recall | f1-score | support |
|---|---|---|---|---|
| BOGO Offers | 0.9257 ± 0.0050 | 0.9783 ± 0.0031 | 0.9513 ± 0.0027 | 2252.0000 ± 0.0000 |
| Discount Offers | 0.9574 ± 0.0039 | 0.9768 ± 0.0032 | 0.9670 ± 0.0018 | 2571.0000 ± 0.0000 |
| Informational Offers | 0.9631 ± 0.0041 | 0.9743 ± 0.0043 | 0.9687 ± 0.0031 | 1493.0000 ± 0.0000 |
| micro avg | 0.9471 ± 0.0027 | 0.9767 ± 0.0020 | 0.9617 ± 0.0014 | 6316.0000 ± 0.0000 |
| macro avg | 0.9487 ± 0.0026 | 0.9765 ± 0.0021 | 0.9623 ± 0.0015 | 6316.0000 ± 0.0000 |
| weighted avg | 0.9474 ± 0.0027 | 0.9767 ± 0.0020 | 0.9618 ± 0.0014 | 6316.0000 ± 0.0000 |
| samples avg | 0.7277 ± 0.0016 | 0.7447 ± 0.0013 | 0.7317 ± 0.0011 | 6316.0000 ± 0.0000 |
Results = pd.DataFrame()
for i in range(len(Target)):
Temp = pd.DataFrame(data = RFC.estimators_[i].feature_importances_, index = X_train.columns, columns = ['Importance'])
Temp['Target'] = Target[i]
Results = Results.append(Temp)
del Temp
Results = Results.reset_index(drop = False).rename(columns = {'index':'Features'})
Temp = pd.pivot_table(Results, values='Importance', index=['Features'], aggfunc=np.mean, fill_value=0).reset_index(drop = False)
Temp['Target'] = 'Overall'
Results = pd.concat([Results, Temp])
del Temp
Results['Features'] = Results['Features'].replace(Feat_Dict)
Colors = ['LightBlue', 'DeepSkyBlue', 'CornFlowerBlue', 'OrangeRed']
fig = px.bar(Results, x='Features', y='Importance', orientation='v',
color = 'Target', color_discrete_sequence= Colors, barmode='group', height= 600)
fig.update_traces(marker_line_color= 'navy', marker_line_width=0.8, opacity=1)
fig.update_xaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
zeroline=False, zerolinewidth=1, zerolinecolor='Black',
showgrid=False, gridwidth=1, gridcolor='Lightgray')
fig.update_yaxes(showline=True, linewidth=1, linecolor='Lightgray', mirror=True,
zeroline=True, zerolinewidth=1, zerolinecolor='Black',
showgrid=True, gridwidth=1, gridcolor='Lightgray', range= [0, .3])
fig.update_layout(legend_orientation='h', plot_bgcolor= 'white',
legend_title_text=None, legend=dict(x=0,y=1.1, bordercolor="Black", borderwidth=1))
fig.update_layout(plot_bgcolor= 'white', width = 900,
title={'text': '<b>' + 'Classification Feature Importance' + '<b>',
'x':0.5, 'y':1, 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
Our model can predict multiple offer types for a given customer, and then sort the recommendations based on a higher probability of conversion. Then, we can find the most suitable offer for the customer.
X_train = User_Data.loc[train_index,['ID']].join(X_train).set_index('ID')
# User_Data.loc[train_index,['ID']].join(y_train)
X_test = User_Data.loc[test_index,['ID']].join(X_test).set_index('ID')
# User_Data.loc[test_index,['ID']].join(y_test)
def Best_Offer (ID, Model = RFC, X = X_test):
# Input: ID
# all avialable offers
Offers = [x.replace(' Offers','') for x in Target]
# the probability for given user(s)
Predicted_Prop = Model.predict_proba(X.loc[ID])
# Best offers for given user(s)
Class_Predicted_Prop = Model.predict(X.loc[ID])
# Prediccted probility for each offer type
Offer_Predicted_Prop = [[Predicted_Prop[c][i][1] for c in range(len(Offers)) ] for i in range(len(ID))]
Offer_Predicted_Prop = np.array(Offer_Predicted_Prop)
# Best_Offer_List
Best_Offer_List = []
for user in range(len(Offer_Predicted_Prop)):
if Class_Predicted_Prop[user].sum()>0:
# the index where predicted offer = 1
Predicted_Class_ID = np.argwhere(Class_Predicted_Prop[user]==1).flatten()
Prop_ID_Sort = np.argsort(-Offer_Predicted_Prop[user])
# sorting probability in descending order to pick the most suitable one
Best_Index = [i for i in Prop_ID_Sort if i in Predicted_Class_ID]
Best_Offer_List.append([Offers[i] for i in Best_Index])
else:
Best_Offer_List.append('No offer is recommended')
return pd.DataFrame(data={'ID': ID, 'Most Suitable Offers': Best_Offer_List})
def Estimated_Probabilities(ID, Model = RFC, X = X_test):
Prop = pd.DataFrame()
for i in range(len(Target)):
Prop = pd.concat([Prop, pd.DataFrame(Model.predict_proba(X.loc[ID])[i])], axis =1)
Temp = []
for x in [x + ' Probability' for x in Target]:
Temp.append(x)
Temp.append(x)
header = [np.array(Temp, dtype = str), np.array([0, 1]*3)]
Prop = pd.DataFrame(Prop.values, columns = header )
Prop.insert(0, 'ID', ID, allow_duplicates=False)
return Prop
For example consider a random list of ten customers. We have,
n=10
ID = np.random.choice(X_test.index, n)
display(Best_Offer(ID).style.hide_index().set_properties(**{'background-color': 'HoneyDew', 'color': 'Black'}).\
set_properties(subset=['ID'], **{'background-color': 'SeaGreen', 'color': 'White'}))
display(Estimated_Probabilities(ID).style.hide_index().set_precision(2).\
set_properties(**{'background-color': 'Linen', 'color': 'Black'}).\
set_properties(subset=['ID'], **{'background-color': 'DarkRed', 'color': 'White'}))
| ID | Most Suitable Offers |
|---|---|
| 2740d12ff4d046c0a45a27652d85963d | ['BOGO offer'] |
| cda8ba00df9e49c3a3a5d318ee4ce9e0 | ['Info offer', 'Disc offer'] |
| 55c69bafc66d4bf6a7df7f1f752c1b38 | ['BOGO offer', 'Disc offer'] |
| 81837e891cb6445894f7cf90e36d6e2e | ['BOGO offer'] |
| 31e915c24163436790b97c1d45b545f6 | ['BOGO offer', 'Disc offer'] |
| b50d7e5567f14070bd95e2c2a45e60f4 | No offer is recommended |
| 30a7268e872746cc8d26a28faa8cec16 | ['Disc offer', 'Info offer'] |
| c964ceaded154e1abbd710096f4d4f51 | No offer is recommended |
| ceb84b3f2d67491486ddd9c0295c382c | ['Disc offer'] |
| 193c5e1694774171aed21630226e1147 | No offer is recommended |
| ID | BOGO offer Probability | Disc offer Probability | Info offer Probability | |||
|---|---|---|---|---|---|---|
| 0 | 1 | 0 | 1 | 0 | 1 | |
| 2740d12ff4d046c0a45a27652d85963d | 0.12 | 0.88 | 0.92 | 0.08 | 1.00 | 0.00 |
| cda8ba00df9e49c3a3a5d318ee4ce9e0 | 0.98 | 0.02 | 0.04 | 0.96 | 0.00 | 1.00 |
| 55c69bafc66d4bf6a7df7f1f752c1b38 | 0.03 | 0.97 | 0.11 | 0.89 | 0.99 | 0.01 |
| 81837e891cb6445894f7cf90e36d6e2e | 0.11 | 0.89 | 0.85 | 0.15 | 1.00 | 0.00 |
| 31e915c24163436790b97c1d45b545f6 | 0.00 | 1.00 | 0.05 | 0.95 | 0.99 | 0.01 |
| b50d7e5567f14070bd95e2c2a45e60f4 | 1.00 | 0.00 | 1.00 | 0.00 | 1.00 | 0.00 |
| 30a7268e872746cc8d26a28faa8cec16 | 0.62 | 0.38 | 0.00 | 1.00 | 0.24 | 0.76 |
| c964ceaded154e1abbd710096f4d4f51 | 1.00 | 0.00 | 1.00 | 0.00 | 1.00 | 0.00 |
| ceb84b3f2d67491486ddd9c0295c382c | 0.84 | 0.16 | 0.09 | 0.91 | 1.00 | 0.00 |
| 193c5e1694774171aed21630226e1147 | 1.00 | 0.00 | 1.00 | 0.00 | 1.00 | 0.00 |